from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
def load_dataset(path):
data = load_files(path)
#print(data['filenames'])
dog_images = np.array(data['filenames'])
dog_labels = np_utils.to_categorical(np.array(data['target']), 133)
return dog_images, dog_labels
train_images, train_labels = load_dataset('dogImages/train')
valid_images, valid_labels = load_dataset('dogImages/valid')
test_images, test_labels = load_dataset('dogImages/test')
print('There are %s total dog images.\n' % len(np.hstack([train_images, valid_images, test_images])))
print('There are %d training dog images.' % len(train_images))
print('There are %d validation dog images.' % len(valid_images))
print('There are %d test dog images.'% len(test_images))
print(len(train_labels))
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
img = cv2.imread(train_images[1])
height,width,channels=img.shape
print(height,width,channels)
plt.imshow(img)
plt.show()
height_list = []
width_list = []
for i in train_images:
img=cv2.imread(i)
height,width,channels=img.shape
height_list.append(height)
width_list.append(width)
plt.hist([height_list, width_list], color=['blue','grey'], label=['height', 'width'])
plt.ylabel('nb of images')
plt.xlabel('pixel')
plt.title('Image dimensions')
print("Average height : %f" % np.mean(height_list))
print("Average width : %f" % np.mean(width_list))
dog_names = [item[20:-1] for item in sorted(glob("dogImages/train/*/"))]
print("The breeds are")
print()
print(dog_names)
print(len(dog_names))
import seaborn as sns
count = []
number = 0;
for item in sorted(glob("dogImages/train/*/")):
for i in glob(item+'*'):
number=number+1
count.append(number)
number = 0
print(count)
sns.set(color_codes=True)
plt.rcParams['figure.figsize'] = (60.0, 30.0)
sns.barplot(dog_names, count, color="lightgreen")
plt.xlabel("dog_names")
plt.ylabel("count")
count = 0
for item in sorted(glob("dogImages/train/*/")):
for i in glob(item+'*'):
img = img=cv2.imread(i)
plt.rcParams['figure.figsize'] = (20.0, 30.0)
#plt.imshow(img)
#plt.show()
count=count+1
if count == 1:
break
count = 0
from keras.preprocessing import image
def tensor_4d(img_path):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input, decode_predictions
import pickle
pickle_in = open("data.pkl","rb")
dog_breed = pickle.load(pickle_in)
model = VGG16(weights='imagenet')
def prediction_labels(img_path):
img = preprocess_input(tensor_4d(img_path))
output = model.predict(img)
#print(output
return dog_breed[np.argmax(output)]
training_set = train_images[:200]
import numpy as np
num_dog_faces = 0
i = 0
answer = []
k = 0
for face in training_set:
answer = prediction_labels(face)
for j in train_labels[i]:
k = k+1
if j == 1:
break;
print(i)
print(answer)
print(dog_names[k-1])
k=0
i= i+1
training_set = test_images[:200]
import numpy as np
i = 0
answer = []
k = 0
for face in training_set:
answer = prediction_labels(face)
for j in test_labels[i]:
k = k+1
if j == 1:
break;
print(i)
print(answer)
print(dog_names[k-1])
k=0
i= i+1
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input, decode_predictions
model = ResNet50(weights='imagenet')
def prediction_labels(img_path):
img = preprocess_input(tensor_4d(img_path))
output = model.predict(img)
#print(output
return dog_breed[np.argmax(output)]
training_set = train_images[:200]
import numpy as np
i = 0
answer = []
k = 0
for face in training_set:
answer = prediction_labels(face)
for j in train_labels[i]:
k = k+1
if j == 1:
break;
print(i)
print(answer)
print(dog_names[k-1])
k=0
i= i+1
training_set = test_images[:200]
import numpy as np
i = 0
answer = []
k = 0
for face in training_set:
answer = prediction_labels(face)
for j in test_labels[i]:
k = k+1
if j == 1:
break;
print(i)
print(answer)
print(dog_names[k-1])
k=0
i= i+1
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input, decode_predictions
model = InceptionV3(weights='imagenet')
def prediction_labels(img_path):
img = preprocess_input(tensor_4d(img_path))
output = model.predict(img)
#print(output
return dog_breed[np.argmax(output)]
training_set = train_images[:200]
import numpy as np
i = 0
answer = []
k = 0
for face in training_set:
answer = prediction_labels(face)
for j in train_labels[i]:
k = k+1
if j == 1:
break;
print(i)
print(answer)
print(dog_names[k-1])
k=0
i= i+1
training_set = test_images[:200]
import numpy as np
i = 0
number = 0
answer = []
k = 0
for face in training_set:
answer = prediction_labels(face)
for j in test_labels[i]:
k = k+1
if j == 1:
break;
print(i)
print(answer)
if dog_names[k-1] in answer:
number = number +1
print(dog_names[k-1])
k=0
i= i+1
print(number)
bottleneck_features_vgg16 = np.load('bottleneck_features/DogVGG16Data.npz')
train_VGG16 = bottleneck_features_vgg16['train']
valid_VGG16 = bottleneck_features_vgg16['valid']
test_VGG16 = bottleneck_features_vgg16['test']
The model uses the the pre-trained VGG-16 model as a feature extractor, where the last convolutional output of VGG-16 is fed as input to our model.
from keras.callbacks import ModelCheckpoint
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
VGG16_model = Sequential()
VGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))
VGG16_model.add(Dropout(0.4))
VGG16_model.add(Dense(500, activation='relu'))
VGG16_model.add(Dropout(0.5))
VGG16_model.add(Dense(133, activation='softmax'))
VGG16_model.summary()
VGG16_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
Checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5',
verbose=1, save_best_only=True)
VGG16_model.fit(train_VGG16, train_labels,
validation_data=(valid_VGG16, valid_labels),
epochs=20, batch_size=20, callbacks=[Checkpointer], verbose=1)
The value of log losscan be infered from above at last epoch i.e epoch 20 is 2.58854
VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')
VGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]
# report test accuracy
test_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_labels, axis=1))/len(VGG16_predictions)
print('Test accuracy: %.2f%%' % test_accuracy)
from extract_bottleneck_features import *
def VGG16_prediction(img_path):
bottleneck_feature = extract_VGG16(tensor_4d(img_path))
predicted_vector = VGG16_model.predict(bottleneck_feature)
return dog_names[np.argmax(predicted_vector)]
def detector(img_path):
breed = VGG16_prediction(img_path)
image = cv2.imread(img_path)
cv_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(cv_rgb)
plt.show()
return breed
test_images_files = np.array(glob("test_images/*"))
print(test_images_files)
for path in test_images_files:
print("actual label:")
print(path)
print("predicted label:")
result = detector(path)
print (result)
bottleneck_features_resnet50 = np.load('bottleneck_features/DogResnet50Data.npz')
train_RESNET50 = bottleneck_features_resnet50['train']
valid_RESNET50 = bottleneck_features_resnet50['valid']
test_RESNET50 = bottleneck_features_resnet50['test']
The model uses the the pre-trained RESNET-50 model as a feature extractor, where the last convolutional output of RESNET50 is fed as input to our model.
RESNET50_model = Sequential()
RESNET50_model.add(GlobalAveragePooling2D(input_shape=train_RESNET50.shape[1:]))
RESNET50_model.add(Dropout(0.4))
RESNET50_model.add(Dense(500, activation='relu'))
RESNET50_model.add(Dropout(0.5))
RESNET50_model.add(Dense(133, activation='softmax'))
RESNET50_model.summary()
RESNET50_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
Checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.RESNET50.hdf5',
verbose=1, save_best_only=True)
RESNET50_model.fit(train_RESNET50, train_labels,
validation_data=(valid_RESNET50, valid_labels),
epochs=20, batch_size=20, callbacks=[Checkpointer], verbose=1)
The value of log losscan be infered from above at last epoch i.e epoch 20 is 0.5902
RESNET50_model.load_weights('saved_models/weights.best.RESNET50.hdf5')
RESNET50_predictions = [np.argmax(RESNET50_model.predict(np.expand_dims(feature, axis=0))) for feature in test_RESNET50]
test_accuracy = 100*np.sum(np.array(RESNET50_predictions)==np.argmax(test_labels, axis=1))/len(RESNET50_predictions)
print('Test accuracy: %.2f%%' % test_accuracy)
from extract_bottleneck_features import *
def RESNET50_prediction(img_path):
bottleneck_feature = extract_Resnet50(tensor_4d(img_path))
predicted_vector = RESNET50_model.predict(bottleneck_feature)
return dog_names[np.argmax(predicted_vector)]
def detector(img_path):
breed = RESNET50_prediction(img_path)
image = cv2.imread(img_path)
cv_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(cv_rgb)
plt.show()
return breed
test_images_files = np.array(glob("test_images/*"))
print(test_images_files)
for path in test_images_files:
print("actual label:")
print(path)
print("predicted label:")
result = detector(path)
print (result)
bottleneck_features_INCEPTION = np.load('bottleneck_features/DogInceptionV3Data.npz')
train_INCEPTION = bottleneck_features_INCEPTION['train']
valid_INCEPTION = bottleneck_features_INCEPTION['valid']
test_INCEPTION = bottleneck_features_INCEPTION['test']
INCEPTION_model = Sequential()
INCEPTION_model.add(GlobalAveragePooling2D(input_shape=train_INCEPTION.shape[1:]))
INCEPTION_model.add(Dropout(0.4))
INCEPTION_model.add(Dense(500, activation='relu'))
INCEPTION_model.add(Dropout(0.5))
INCEPTION_model.add(Dense(133, activation='softmax'))
INCEPTION_model.summary()
INCEPTION_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
Checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.INCEPTION.hdf5',
verbose=1, save_best_only=True)
INCEPTION_model.fit(train_INCEPTION, train_labels,
validation_data=(valid_INCEPTION, valid_labels),
epochs=20, batch_size=20, callbacks= [Checkpointer], verbose=1)
INCEPTION_model.load_weights('saved_models/weights.best.INCEPTION.hdf5')
INCEPTION_predictions = [np.argmax(INCEPTION_model.predict(np.expand_dims(feature, axis=0))) for feature in test_INCEPTION]
test_accuracy = 100*np.sum(np.array(INCEPTION_predictions)==np.argmax(test_labels, axis=1))/len(INCEPTION_predictions)
print('Test accuracy: %.2f%%' % test_accuracy)
from extract_bottleneck_features import *
def INCEPTION_prediction(img_path):
bottleneck_feature = extract_InceptionV3(tensor_4d(img_path))
predicted_vector = INCEPTION_model.predict(bottleneck_feature)
return dog_names[np.argmax(predicted_vector)]
def detector(img_path):
breed = INCEPTION_prediction(img_path)
image = cv2.imread(img_path)
cv_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(cv_rgb)
plt.show()
return breed
test_images_files = np.array(glob("test_images/*"))
print(test_images_files)
for path in test_images_files:
print("actual label:")
print(path)
print("predicted label:")
result = detector(path)
print (result)